models
Definitions of all models available in Transformers.js.
Example: Load and run an AutoModel
.
import { AutoModel, AutoTokenizer } from '@huggingface/transformers';
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');
let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased');
let inputs = await tokenizer('I love transformers!');
let { logits } = await model(inputs);
// Tensor {
// data: Float32Array(183132) [-7.117443084716797, -7.107812881469727, -7.092104911804199, ...]
// dims: (3) [1, 6, 30522],
// type: "float32",
// size: 183132,
// }
We also provide other AutoModel
s (listed below), which you can use in the same way as the Python library. For example:
Example: Load and run an AutoModelForSeq2SeqLM
.
import { AutoModelForSeq2SeqLM, AutoTokenizer } from '@huggingface/transformers';
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/t5-small');
let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small');
let { input_ids } = await tokenizer('translate English to German: I love transformers!');
let outputs = await model.generate(input_ids);
let decoded = tokenizer.decode(outputs[0], { skip_special_tokens: true });
// 'Ich liebe Transformatoren!'
- models
- static
- .PreTrainedModel
new PreTrainedModel(config, sessions, configs)
- instance
.custom_config
:*
.generation_config
βGenerationConfig
|null
.dispose()
βPromise.<Array<unknown>>
._call(model_inputs)
βPromise.<Object>
.forward(model_inputs)
βPromise.<Object>
._get_logits_warper(generation_config)
βLogitsProcessorList
._prepare_generation_config(generation_config, kwargs)
βGenerationConfig
._get_stopping_criteria(generation_config, [stopping_criteria])
._validate_model_class()
._update_model_kwargs_for_generation(inputs)
βObject
._prepare_model_inputs(params)
βObject
._prepare_decoder_input_ids_for_generation(param0)
.generate(options)
βPromise.<(ModelOutput|Tensor)>
.getPastKeyValues(decoderResults, pastKeyValues)
βObject
.getAttentions(model_output)
β*
.addPastKeyValues(decoderFeeds, pastKeyValues)
- static
.from_pretrained(pretrained_model_name_or_path, options)
βPromise.<PreTrainedModel>
- .BaseModelOutput
- .BertForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .BertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .BertForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .BertForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .RoFormerModel
- .RoFormerForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .RoFormerForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .RoFormerForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .RoFormerForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .ConvBertModel
- .ConvBertForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .ConvBertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .ConvBertForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .ConvBertForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .ElectraModel
- .ElectraForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .ElectraForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .ElectraForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .ElectraForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .CamembertModel
- .CamembertForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .CamembertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .CamembertForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .CamembertForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .DebertaModel
- .DebertaForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .DebertaForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .DebertaForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .DebertaForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .DebertaV2Model
- .DebertaV2ForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .DebertaV2ForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .DebertaV2ForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .DebertaV2ForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .DistilBertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .DistilBertForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .DistilBertForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .DistilBertForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .EsmModel
- .EsmForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .EsmForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .EsmForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .MobileBertForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .MobileBertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .MobileBertForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .MPNetModel
- .MPNetForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .MPNetForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .MPNetForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .MPNetForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .T5ForConditionalGeneration
- .LongT5PreTrainedModel
- .LongT5Model
- .LongT5ForConditionalGeneration
- .MT5ForConditionalGeneration
- .BartModel
- .BartForConditionalGeneration
- .BartForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .MBartModel
- .MBartForConditionalGeneration
- .MBartForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .BlenderbotModel
- .BlenderbotForConditionalGeneration
- .BlenderbotSmallModel
- .BlenderbotSmallForConditionalGeneration
- .RobertaForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .RobertaForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .RobertaForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .RobertaForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .XLMPreTrainedModel
- .XLMModel
- .XLMWithLMHeadModel
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .XLMForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .XLMForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .XLMForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .XLMRobertaForMaskedLM
._call(model_inputs)
βPromise.<MaskedLMOutput>
- .XLMRobertaForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .XLMRobertaForTokenClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .XLMRobertaForQuestionAnswering
._call(model_inputs)
βPromise.<QuestionAnsweringModelOutput>
- .ASTModel
- .ASTForAudioClassification
- .WhisperModel
- .WhisperForConditionalGeneration
._retrieve_init_tokens(generation_config)
.generate(options)
βPromise.<(ModelOutput|Tensor)>
._extract_token_timestamps(generate_outputs, alignment_heads, [num_frames], [time_precision])
βTensor
- .MoonshineModel
- .VisionEncoderDecoderModel
- .LlavaForConditionalGeneration
- .Idefics3ForConditionalGeneration
- .CLIPModel
- .CLIPTextModel
- .CLIPTextModelWithProjection
- .CLIPVisionModel
- .CLIPVisionModelWithProjection
- .SiglipModel
- .SiglipTextModel
- .SiglipVisionModel
- .CLIPSegForImageSegmentation
- .GPT2LMHeadModel
- .JAISModel
- .JAISLMHeadModel
- .CodeGenModel
- .CodeGenForCausalLM
- .LlamaPreTrainedModel
- .LlamaModel
- .CoherePreTrainedModel
- .GemmaPreTrainedModel
- .GemmaModel
- .Gemma2PreTrainedModel
- .Gemma2Model
- .Qwen2PreTrainedModel
- .Qwen2Model
- .PhiModel
- .Phi3Model
- .BloomPreTrainedModel
- .BloomModel
- .BloomForCausalLM
- .MptModel
- .MptForCausalLM
- .OPTModel
- .OPTForCausalLM
- .VitPoseForPoseEstimation
- .VitMatteForImageMatting
- .DetrObjectDetectionOutput
- .DetrSegmentationOutput
- .RTDetrObjectDetectionOutput
- .TableTransformerModel
- .TableTransformerForObjectDetection
- .ResNetPreTrainedModel
- .ResNetModel
- .ResNetForImageClassification
- .Swin2SRModel
- .Swin2SRForImageSuperResolution
- .DPTModel
- .DPTForDepthEstimation
- .DepthAnythingForDepthEstimation
- .GLPNModel
- .GLPNForDepthEstimation
- .DonutSwinModel
- .ConvNextModel
- .ConvNextForImageClassification
- .ConvNextV2Model
- .ConvNextV2ForImageClassification
- .Dinov2Model
- .Dinov2ForImageClassification
- .Dinov2WithRegistersModel
- .Dinov2WithRegistersForImageClassification
- .YolosObjectDetectionOutput
- .SamModel
.get_image_embeddings(model_inputs)
βPromise.<{image_embeddings: Tensor, image_positional_embeddings: Tensor}>
.forward(model_inputs)
βPromise.<Object>
._call(model_inputs)
βPromise.<SamImageSegmentationOutput>
- .SamImageSegmentationOutput
- .Wav2Vec2Model
- .Wav2Vec2ForAudioFrameClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .PyAnnoteModel
- .PyAnnoteForAudioFrameClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .UniSpeechModel
- .UniSpeechForCTC
- .UniSpeechForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .UniSpeechSatModel
- .UniSpeechSatForCTC
- .UniSpeechSatForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .UniSpeechSatForAudioFrameClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .Wav2Vec2BertModel
- .Wav2Vec2BertForCTC
- .Wav2Vec2BertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .HubertModel
- .HubertForCTC
- .HubertForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .WavLMPreTrainedModel
- .WavLMModel
- .WavLMForCTC
- .WavLMForSequenceClassification
._call(model_inputs)
βPromise.<SequenceClassifierOutput>
- .WavLMForXVector
._call(model_inputs)
βPromise.<XVectorOutput>
- .WavLMForAudioFrameClassification
._call(model_inputs)
βPromise.<TokenClassifierOutput>
- .SpeechT5PreTrainedModel
- .SpeechT5Model
- .SpeechT5ForSpeechToText
- .SpeechT5ForTextToSpeech
.generate_speech(input_values, speaker_embeddings, options)
βPromise.<SpeechOutput>
- .SpeechT5HifiGan
- .TrOCRForCausalLM
- .MistralPreTrainedModel
- .Starcoder2PreTrainedModel
- .FalconPreTrainedModel
- .ClapTextModelWithProjection
- .ClapAudioModelWithProjection
- .VitsModel
._call(model_inputs)
βPromise.<VitsModelOutput>
- .SegformerModel
- .SegformerForImageClassification
- .SegformerForSemanticSegmentation
- .StableLmModel
- .StableLmForCausalLM
- .EfficientNetModel
- .EfficientNetForImageClassification
- .MusicgenModel
- .MusicgenForCausalLM
- .MusicgenForConditionalGeneration
._apply_and_filter_by_delay_pattern_mask(outputs)
βTensor
.generate(options)
βPromise.<(ModelOutput|Tensor)>
- .MobileNetV1Model
- .MobileNetV1ForImageClassification
- .MobileNetV2Model
- .MobileNetV2ForImageClassification
- .MobileNetV3Model
- .MobileNetV3ForImageClassification
- .MobileNetV4Model
- .MobileNetV4ForImageClassification
- .DecisionTransformerModel
- .MgpstrForSceneTextRecognition
- .PatchTSTModel
- .PatchTSTForPrediction
- .PatchTSMixerModel
- .PatchTSMixerForPrediction
- .PretrainedMixin
- instance
- static
- .AutoModel
- .AutoModelForSequenceClassification
- .AutoModelForTokenClassification
- .AutoModelForSeq2SeqLM
- .AutoModelForSpeechSeq2Seq
- .AutoModelForTextToSpectrogram
- .AutoModelForTextToWaveform
- .AutoModelForCausalLM
- .AutoModelForMaskedLM
- .AutoModelForQuestionAnswering
- .AutoModelForVision2Seq
- .AutoModelForImageClassification
- .AutoModelForImageSegmentation
- .AutoModelForSemanticSegmentation
- .AutoModelForUniversalSegmentation
- .AutoModelForObjectDetection
- .AutoModelForMaskGeneration
- .Seq2SeqLMOutput
- .SequenceClassifierOutput
- .XVectorOutput
- .TokenClassifierOutput
- .MaskedLMOutput
- .QuestionAnsweringModelOutput
- .CausalLMOutput
- .CausalLMOutputWithPast
- .ImageMattingOutput
- .VitsModelOutput
- .PreTrainedModel
- inner
~cumsum_masked_fill(attention_mask)
βObject
~createPositionIds()
~SamModelInputs
:Object
~SpeechOutput
:Object
- static
models.PreTrainedModel
A base class for pre-trained models that provides the model configuration and an ONNX session.
Kind: static class of models
- .PreTrainedModel
new PreTrainedModel(config, sessions, configs)
- instance
.custom_config
:*
.generation_config
βGenerationConfig
|null
.dispose()
βPromise.<Array<unknown>>
._call(model_inputs)
βPromise.<Object>
.forward(model_inputs)
βPromise.<Object>
._get_logits_warper(generation_config)
βLogitsProcessorList
._prepare_generation_config(generation_config, kwargs)
βGenerationConfig
._get_stopping_criteria(generation_config, [stopping_criteria])
._validate_model_class()
._update_model_kwargs_for_generation(inputs)
βObject
._prepare_model_inputs(params)
βObject
._prepare_decoder_input_ids_for_generation(param0)
.generate(options)
βPromise.<(ModelOutput|Tensor)>
.getPastKeyValues(decoderResults, pastKeyValues)
βObject
.getAttentions(model_output)
β*
.addPastKeyValues(decoderFeeds, pastKeyValues)
- static
.from_pretrained(pretrained_model_name_or_path, options)
βPromise.<PreTrainedModel>
new PreTrainedModel(config, sessions, configs)
Creates a new instance of the PreTrainedModel
class.
Param | Type | Description |
---|---|---|
config | * | The model configuration. |
sessions | Record.<string, any> | The inference sessions for the model. |
configs | Record.<string, Object> | Additional configuration files (e.g., generation_config.json). |
preTrainedModel.custom_config : <code> * </code>
Kind: instance property of PreTrainedModel
preTrainedModel.generation_config β <code> GenerationConfig </code> | <code> null </code>
Get the modelβs generation config, if it exists.
Kind: instance property of PreTrainedModel
Returns: GenerationConfig
| null
- The modelβs generation config if it exists, otherwise null
.
preTrainedModel.dispose() β <code> Promise. < Array < unknown > > </code>
Disposes of all the ONNX sessions that were created during inference.
Kind: instance method of PreTrainedModel
Returns: Promise.<Array<unknown>>
- An array of promises, one for each ONNX session that is being disposed.
Todo
preTrainedModel._call(model_inputs) β <code> Promise. < Object > </code>
Runs the model with the provided inputs
Kind: instance method of PreTrainedModel
Returns: Promise.<Object>
- Object containing output tensors
Param | Type | Description |
---|---|---|
model_inputs | Object | Object containing input tensors |
preTrainedModel.forward(model_inputs) β <code> Promise. < Object > </code>
Forward method for a pretrained model. If not overridden by a subclass, the correct forward method will be chosen based on the model type.
Kind: instance method of PreTrainedModel
Returns: Promise.<Object>
- The output data from the model in the format specified in the ONNX model.
Throws:
Error
This method must be implemented in subclasses.
Param | Type | Description |
---|---|---|
model_inputs | Object | The input data to the model in the format specified in the ONNX model. |
preTrainedModel._get_logits_warper(generation_config) β <code> LogitsProcessorList </code>
This function returns a [LogitsProcessorList
] list object that contains all relevant [LogitsWarper
]
instances used for multinomial sampling.
Kind: instance method of PreTrainedModel
Returns: LogitsProcessorList
- generation_config
Param | Type | Description |
---|---|---|
generation_config | GenerationConfig | The generation config. |
preTrainedModel._prepare_generation_config(generation_config, kwargs) β <code> GenerationConfig </code>
This function merges multiple generation configs together to form a final generation config to be used by the model for text generation.
It first creates an empty GenerationConfig
object, then it applies the modelβs own generation_config
property to it. Finally, if a generation_config
object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object.
Kind: instance method of PreTrainedModel
Returns: GenerationConfig
- The final generation config object to be used by the model for text generation.
Param | Type | Description |
---|---|---|
generation_config | GenerationConfig | null | A |
kwargs | Object | Additional generation parameters to be used in place of those in the |
preTrainedModel._get_stopping_criteria(generation_config, [stopping_criteria])
Kind: instance method of PreTrainedModel
Param | Type | Default |
---|---|---|
generation_config | GenerationConfig | |
[stopping_criteria] | StoppingCriteriaList |
|
preTrainedModel._validate_model_class()
Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use.
Kind: instance method of PreTrainedModel
preTrainedModel._update_model_kwargs_for_generation(inputs) β <code> Object </code>
Kind: instance method of PreTrainedModel
Returns: Object
- The updated model inputs for the next generation iteration.
Param | Type |
---|---|
inputs | Object |
inputs.generated_input_ids | Array.<Array<bigint>> |
inputs.outputs | Object |
inputs.model_inputs | Object |
inputs.is_encoder_decoder | boolean |
preTrainedModel._prepare_model_inputs(params) β <code> Object </code>
This function extracts the model-specific inputs
for generation.
Kind: instance method of PreTrainedModel
Returns: Object
- The model-specific inputs for generation.
Param | Type | Default |
---|---|---|
params | Object | |
[params.inputs] | Tensor |
|
[params.bos_token_id] | number |
|
[params.model_kwargs] | Record.<string, (Tensor|Array<number>)> |
preTrainedModel._prepare_decoder_input_ids_for_generation(param0)
Prepares decoder_input_ids
for generation with encoder-decoder models
Kind: instance method of PreTrainedModel
Param | Type |
---|---|
param0 | * |
preTrainedModel.generate(options) β <code> Promise. < (ModelOutput|Tensor) > </code>
Generates sequences of token ids for models with a language modeling head.
Kind: instance method of PreTrainedModel
Returns: Promise.<(ModelOutput|Tensor)>
- The output of the model, which can contain the generated token ids, attentions, and scores.
Param | Type |
---|---|
options | * |
preTrainedModel.getPastKeyValues(decoderResults, pastKeyValues) β <code> Object </code>
Returns an object containing past key values from the given decoder results object.
Kind: instance method of PreTrainedModel
Returns: Object
- An object containing past key values.
Param | Type | Description |
---|---|---|
decoderResults | Object | The decoder results object. |
pastKeyValues | Object | The previous past key values. |
preTrainedModel.getAttentions(model_output) β <code> * </code>
Returns an object containing attentions from the given model output object.
Kind: instance method of PreTrainedModel
Returns: *
- An object containing attentions.
Param | Type | Description |
---|---|---|
model_output | Object | The output of the model. |
preTrainedModel.addPastKeyValues(decoderFeeds, pastKeyValues)
Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values.
Kind: instance method of PreTrainedModel
Param | Type | Description |
---|---|---|
decoderFeeds | Object | The decoder feeds object to add past key values to. |
pastKeyValues | Object | An object containing past key values. |
PreTrainedModel.from_pretrained(pretrained_model_name_or_path, options) β <code> Promise. < PreTrainedModel > </code>
Instantiate one of the model classes of the library from a pretrained model.
The model class to instantiate is selected based on the model_type
property of the config object
(either passed as an argument or loaded from pretrained_model_name_or_path
if possible)
Kind: static method of PreTrainedModel
Returns: Promise.<PreTrainedModel>
- A new instance of the PreTrainedModel
class.
Param | Type | Description |
---|---|---|
pretrained_model_name_or_path | string | The name or path of the pretrained model. Can be either:
|
options | * | Additional options for loading the model. |
models.BaseModelOutput
Base class for modelβs outputs, with potential hidden states and attentions.
Kind: static class of models
new BaseModelOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.last_hidden_state | Tensor | Sequence of hidden-states at the output of the last layer of the model. |
[output.hidden_states] | Tensor | Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. |
[output.attentions] | Tensor | Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. |
models.BertForMaskedLM
BertForMaskedLM is a class representing a BERT model for masked language modeling.
Kind: static class of models
bertForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of BertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.BertForSequenceClassification
BertForSequenceClassification is a class representing a BERT model for sequence classification.
Kind: static class of models
bertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of BertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.BertForTokenClassification
BertForTokenClassification is a class representing a BERT model for token classification.
Kind: static class of models
bertForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of BertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.BertForQuestionAnswering
BertForQuestionAnswering is a class representing a BERT model for question answering.
Kind: static class of models
bertForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of BertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RoFormerModel
The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.RoFormerForMaskedLM
RoFormer Model with a language modeling
head on top.
Kind: static class of models
roFormerForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of RoFormerForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RoFormerForSequenceClassification
RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
roFormerForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of RoFormerForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RoFormerForTokenClassification
RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
roFormerForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of RoFormerForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RoFormerForQuestionAnswering
RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
roFormerForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of RoFormerForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ConvBertModel
The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.ConvBertForMaskedLM
ConvBERT Model with a language modeling head on top.
Kind: static class of models
convBertForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of ConvBertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ConvBertForSequenceClassification
ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
convBertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of ConvBertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ConvBertForTokenClassification
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
convBertForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of ConvBertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ConvBertForQuestionAnswering
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute span start logits
and span end logits
)
Kind: static class of models
convBertForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of ConvBertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ElectraModel
The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the hidden size and embedding size are different.
Kind: static class of models
models.ElectraForMaskedLM
Electra model with a language modeling head on top.
Kind: static class of models
electraForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of ElectraForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ElectraForSequenceClassification
ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
electraForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of ElectraForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ElectraForTokenClassification
Electra model with a token classification head on top.
Kind: static class of models
electraForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of ElectraForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ElectraForQuestionAnswering
LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
electraForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of ElectraForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.CamembertModel
The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.CamembertForMaskedLM
CamemBERT Model with a language modeling
head on top.
Kind: static class of models
camembertForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of CamembertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.CamembertForSequenceClassification
CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
Kind: static class of models
camembertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of CamembertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.CamembertForTokenClassification
CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
camembertForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of CamembertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.CamembertForQuestionAnswering
CamemBERT Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
camembertForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of CamembertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaModel
The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.DebertaForMaskedLM
DeBERTa Model with a language modeling
head on top.
Kind: static class of models
debertaForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaForSequenceClassification
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
debertaForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaForTokenClassification
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
debertaForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaForQuestionAnswering
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
debertaForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaV2Model
The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.DebertaV2ForMaskedLM
DeBERTa-V2 Model with a language modeling
head on top.
Kind: static class of models
debertaV2ForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaV2ForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaV2ForSequenceClassification
DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
debertaV2ForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaV2ForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaV2ForTokenClassification
DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
debertaV2ForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaV2ForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DebertaV2ForQuestionAnswering
DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute span start logits
and span end logits
).
Kind: static class of models
debertaV2ForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of DebertaV2ForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DistilBertForSequenceClassification
DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification.
Kind: static class of models
distilBertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DistilBertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DistilBertForTokenClassification
DistilBertForTokenClassification is a class representing a DistilBERT model for token classification.
Kind: static class of models
distilBertForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of DistilBertForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DistilBertForQuestionAnswering
DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering.
Kind: static class of models
distilBertForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of DistilBertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.DistilBertForMaskedLM
DistilBertForMaskedLM is a class representing a DistilBERT model for masking task.
Kind: static class of models
distilBertForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of DistilBertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.EsmModel
The bare ESM Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.EsmForMaskedLM
ESM Model with a language modeling
head on top.
Kind: static class of models
esmForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of EsmForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.EsmForSequenceClassification
ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
esmForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of EsmForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.EsmForTokenClassification
ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
Kind: static class of models
esmForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of EsmForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MobileBertForMaskedLM
MobileBertForMaskedLM is a class representing a MobileBERT model for masking task.
Kind: static class of models
mobileBertForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of MobileBertForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MobileBertForSequenceClassification
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
mobileBertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of MobileBertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MobileBertForQuestionAnswering
MobileBert Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
mobileBertForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of MobileBertForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MPNetModel
The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MPNetForMaskedLM
MPNetForMaskedLM is a class representing a MPNet model for masked language modeling.
Kind: static class of models
mpNetForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of MPNetForMaskedLM
Returns: Promise.<MaskedLMOutput>
- An object containing the modelβs output logits for masked language modeling.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MPNetForSequenceClassification
MPNetForSequenceClassification is a class representing a MPNet model for sequence classification.
Kind: static class of models
mpNetForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of MPNetForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MPNetForTokenClassification
MPNetForTokenClassification is a class representing a MPNet model for token classification.
Kind: static class of models
mpNetForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of MPNetForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MPNetForQuestionAnswering
MPNetForQuestionAnswering is a class representing a MPNet model for question answering.
Kind: static class of models
mpNetForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of MPNetForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- An object containing the modelβs output logits for question answering.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.T5ForConditionalGeneration
T5Model is a class representing a T5 model for conditional generation.
Kind: static class of models
models.LongT5PreTrainedModel
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
models.LongT5Model
The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.LongT5ForConditionalGeneration
LONGT5 Model with a language modeling
head on top.
Kind: static class of models
models.MT5ForConditionalGeneration
A class representing a conditional sequence-to-sequence model based on the MT5 architecture.
Kind: static class of models
models.BartModel
The bare BART Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.BartForConditionalGeneration
The BART Model with a language modeling head. Can be used for summarization.
Kind: static class of models
models.BartForSequenceClassification
Bart model with a sequence classification/head on top (a linear layer on top of the pooled output)
Kind: static class of models
bartForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of BartForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.MBartModel
The bare MBART Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MBartForConditionalGeneration
The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.
Kind: static class of models
models.MBartForSequenceClassification
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output).
Kind: static class of models
mBartForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of MBartForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.BlenderbotModel
The bare Blenderbot Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.BlenderbotForConditionalGeneration
The Blenderbot Model with a language modeling head. Can be used for summarization.
Kind: static class of models
models.BlenderbotSmallModel
The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.BlenderbotSmallForConditionalGeneration
The BlenderbotSmall Model with a language modeling head. Can be used for summarization.
Kind: static class of models
models.RobertaForMaskedLM
RobertaForMaskedLM class for performing masked language modeling on Roberta models.
Kind: static class of models
robertaForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of RobertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RobertaForSequenceClassification
RobertaForSequenceClassification class for performing sequence classification on Roberta models.
Kind: static class of models
robertaForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of RobertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RobertaForTokenClassification
RobertaForTokenClassification class for performing token classification on Roberta models.
Kind: static class of models
robertaForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of RobertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.RobertaForQuestionAnswering
RobertaForQuestionAnswering class for performing question answering on Roberta models.
Kind: static class of models
robertaForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of RobertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMPreTrainedModel
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
models.XLMModel
The bare XLM Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.XLMWithLMHeadModel
The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
xlmWithLMHeadModel._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMWithLMHeadModel
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMForSequenceClassification
XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
Kind: static class of models
xlmForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMForTokenClassification
XLM Model with a token classification head on top (a linear layer on top of the hidden-states output)
Kind: static class of models
xlmForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMForQuestionAnswering
XLM Model with a span classification head on top for extractive question-answering tasks
Kind: static class of models
xlmForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMRobertaForMaskedLM
XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models.
Kind: static class of models
xlmRobertaForMaskedLM._call(model_inputs) β <code> Promise. < MaskedLMOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMRobertaForMaskedLM
Returns: Promise.<MaskedLMOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMRobertaForSequenceClassification
XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models.
Kind: static class of models
xlmRobertaForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMRobertaForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMRobertaForTokenClassification
XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models.
Kind: static class of models
xlmRobertaForTokenClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMRobertaForTokenClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for token classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.XLMRobertaForQuestionAnswering
XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models.
Kind: static class of models
xlmRobertaForQuestionAnswering._call(model_inputs) β <code> Promise. < QuestionAnsweringModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of XLMRobertaForQuestionAnswering
Returns: Promise.<QuestionAnsweringModelOutput>
- returned object
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.ASTModel
The bare AST Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.ASTForAudioClassification
Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2.
Kind: static class of models
models.WhisperModel
WhisperModel class for training Whisper models without a language model head.
Kind: static class of models
models.WhisperForConditionalGeneration
WhisperForConditionalGeneration class for generating conditional outputs from Whisper models.
Kind: static class of models
- .WhisperForConditionalGeneration
._retrieve_init_tokens(generation_config)
.generate(options)
βPromise.<(ModelOutput|Tensor)>
._extract_token_timestamps(generate_outputs, alignment_heads, [num_frames], [time_precision])
βTensor
whisperForConditionalGeneration._retrieve_init_tokens(generation_config)
Kind: instance method of WhisperForConditionalGeneration
Param | Type |
---|---|
generation_config | WhisperGenerationConfig |
whisperForConditionalGeneration.generate(options) β <code> Promise. < (ModelOutput|Tensor) > </code>
Transcribes or translates log-mel input features to a sequence of auto-regressively generated token ids.
Kind: instance method of WhisperForConditionalGeneration
Returns: Promise.<(ModelOutput|Tensor)>
- The output of the model, which can contain the generated token ids, attentions, and scores.
Param | Type |
---|---|
options | * |
whisperForConditionalGeneration._extract_token_timestamps(generate_outputs, alignment_heads, [num_frames], [time_precision]) β <code> Tensor </code>
Calculates token-level timestamps using the encoder-decoder cross-attentions and
dynamic time-warping (DTW) to map each output token to a position in the input audio.
If num_frames
is specified, the encoder-decoder cross-attentions will be cropped before applying DTW.
Kind: instance method of WhisperForConditionalGeneration
Returns: Tensor
- tensor containing the timestamps in seconds for each predicted token
Param | Type | Default | Description |
---|---|---|---|
generate_outputs | Object | Outputs generated by the model | |
generate_outputs.cross_attentions | Array.<Array<Tensor>> | The cross attentions output by the model | |
generate_outputs.sequences | Tensor | The sequences output by the model | |
alignment_heads | Array.<Array<number>> | Alignment heads of the model | |
[num_frames] | number |
| Number of frames in the input audio. |
[time_precision] | number | 0.02 | Precision of the timestamps in seconds |
models.MoonshineModel
MoonshineModel class for training Moonshine models without a language model head.
Kind: static class of models
models.VisionEncoderDecoderModel
Vision Encoder-Decoder model based on OpenAIβs GPT architecture for image captioning and other vision tasks
Kind: static class of models
models.LlavaForConditionalGeneration
The LLAVA model which consists of a vision backbone and a language model.
Kind: static class of models
models.Idefics3ForConditionalGeneration
The LLAVA model which consists of a vision backbone and a language model.
Kind: static class of models
models.CLIPModel
CLIP Text and Vision Model with a projection layers on top
Example: Perform zero-shot image classification with a CLIPModel
.
import { AutoTokenizer, AutoProcessor, CLIPModel, RawImage } from '@huggingface/transformers';
// Load tokenizer, processor, and model
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');
let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
let model = await CLIPModel.from_pretrained('Xenova/clip-vit-base-patch16');
// Run tokenization
let texts = ['a photo of a car', 'a photo of a football match']
let text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Read image and run processor
let image = await RawImage.read('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
let image_inputs = await processor(image);
// Run model with both text and pixel inputs
let output = await model({ ...text_inputs, ...image_inputs });
// {
// logits_per_image: Tensor {
// dims: [ 1, 2 ],
// data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],
// },
// logits_per_text: Tensor {
// dims: [ 2, 1 ],
// data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],
// },
// text_embeds: Tensor {
// dims: [ 2, 512 ],
// data: Float32Array(1024) [ ... ],
// },
// image_embeds: Tensor {
// dims: [ 1, 512 ],
// data: Float32Array(512) [ ... ],
// }
// }
Kind: static class of models
models.CLIPTextModel
The text model from CLIP without any head or projection on top.
Kind: static class of models
CLIPTextModel.from_pretrained() : <code> * </code>
Kind: static method of CLIPTextModel
models.CLIPTextModelWithProjection
CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output)
Example: Compute text embeddings with CLIPTextModelWithProjection
.
import { AutoTokenizer, CLIPTextModelWithProjection } from '@huggingface/transformers';
// Load tokenizer and text model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');
const text_model = await CLIPTextModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');
// Run tokenization
let texts = ['a photo of a car', 'a photo of a football match'];
let text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Compute embeddings
const { text_embeds } = await text_model(text_inputs);
// Tensor {
// dims: [ 2, 512 ],
// type: 'float32',
// data: Float32Array(1024) [ ... ],
// size: 1024
// }
Kind: static class of models
CLIPTextModelWithProjection.from_pretrained() : <code> * </code>
Kind: static method of CLIPTextModelWithProjection
models.CLIPVisionModel
The vision model from CLIP without any head or projection on top.
Kind: static class of models
CLIPVisionModel.from_pretrained() : <code> * </code>
Kind: static method of CLIPVisionModel
models.CLIPVisionModelWithProjection
CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output)
Example: Compute vision embeddings with CLIPVisionModelWithProjection
.
import { AutoProcessor, CLIPVisionModelWithProjection, RawImage} from '@huggingface/transformers';
// Load processor and vision model
const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');
const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');
// Read image and run processor
let image = await RawImage.read('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
let image_inputs = await processor(image);
// Compute embeddings
const { image_embeds } = await vision_model(image_inputs);
// Tensor {
// dims: [ 1, 512 ],
// type: 'float32',
// data: Float32Array(512) [ ... ],
// size: 512
// }
Kind: static class of models
CLIPVisionModelWithProjection.from_pretrained() : <code> * </code>
Kind: static method of CLIPVisionModelWithProjection
models.SiglipModel
SigLIP Text and Vision Model with a projection layers on top
Example: Perform zero-shot image classification with a SiglipModel
.
import { AutoTokenizer, AutoProcessor, SiglipModel, RawImage } from '@huggingface/transformers';
// Load tokenizer, processor, and model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224');
const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224');
const model = await SiglipModel.from_pretrained('Xenova/siglip-base-patch16-224');
// Run tokenization
const texts = ['a photo of 2 cats', 'a photo of 2 dogs'];
const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true });
// Read image and run processor
const image = await RawImage.read('http://images.cocodataset.org/val2017/000000039769.jpg');
const image_inputs = await processor(image);
// Run model with both text and pixel inputs
const output = await model({ ...text_inputs, ...image_inputs });
// {
// logits_per_image: Tensor {
// dims: [ 1, 2 ],
// data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ],
// },
// logits_per_text: Tensor {
// dims: [ 2, 1 ],
// data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ],
// },
// text_embeds: Tensor {
// dims: [ 2, 768 ],
// data: Float32Array(1536) [ ... ],
// },
// image_embeds: Tensor {
// dims: [ 1, 768 ],
// data: Float32Array(768) [ ... ],
// }
// }
Kind: static class of models
models.SiglipTextModel
The text model from SigLIP without any head or projection on top.
Example: Compute text embeddings with SiglipTextModel
.
import { AutoTokenizer, SiglipTextModel } from '@huggingface/transformers';
// Load tokenizer and text model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224');
const text_model = await SiglipTextModel.from_pretrained('Xenova/siglip-base-patch16-224');
// Run tokenization
const texts = ['a photo of 2 cats', 'a photo of 2 dogs'];
const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true });
// Compute embeddings
const { pooler_output } = await text_model(text_inputs);
// Tensor {
// dims: [ 2, 768 ],
// type: 'float32',
// data: Float32Array(1536) [ ... ],
// size: 1536
// }
Kind: static class of models
SiglipTextModel.from_pretrained() : <code> * </code>
Kind: static method of SiglipTextModel
models.SiglipVisionModel
The vision model from SigLIP without any head or projection on top.
Example: Compute vision embeddings with SiglipVisionModel
.
import { AutoProcessor, SiglipVisionModel, RawImage} from '@huggingface/transformers';
// Load processor and vision model
const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224');
const vision_model = await SiglipVisionModel.from_pretrained('Xenova/siglip-base-patch16-224');
// Read image and run processor
const image = await RawImage.read('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
const image_inputs = await processor(image);
// Compute embeddings
const { pooler_output } = await vision_model(image_inputs);
// Tensor {
// dims: [ 1, 768 ],
// type: 'float32',
// data: Float32Array(768) [ ... ],
// size: 768
// }
Kind: static class of models
SiglipVisionModel.from_pretrained() : <code> * </code>
Kind: static method of SiglipVisionModel
models.CLIPSegForImageSegmentation
CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.
Example: Perform zero-shot image segmentation with a CLIPSegForImageSegmentation
model.
import { AutoTokenizer, AutoProcessor, CLIPSegForImageSegmentation, RawImage } from '@huggingface/transformers';
// Load tokenizer, processor, and model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clipseg-rd64-refined');
const processor = await AutoProcessor.from_pretrained('Xenova/clipseg-rd64-refined');
const model = await CLIPSegForImageSegmentation.from_pretrained('Xenova/clipseg-rd64-refined');
// Run tokenization
const texts = ['a glass', 'something to fill', 'wood', 'a jar'];
const text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Read image and run processor
const image = await RawImage.read('https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true');
const image_inputs = await processor(image);
// Run model with both text and pixel inputs
const { logits } = await model({ ...text_inputs, ...image_inputs });
// logits: Tensor {
// dims: [4, 352, 352],
// type: 'float32',
// data: Float32Array(495616) [ ... ],
// size: 495616
// }
You can visualize the predictions as follows:
const preds = logits
.unsqueeze_(1)
.sigmoid_()
.mul_(255)
.round_()
.to('uint8');
for (let i = 0; i < preds.dims[0]; ++i) {
const img = RawImage.fromTensor(preds[i]);
img.save(`prediction_${i}.png`);
}
Kind: static class of models
models.GPT2LMHeadModel
GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks.
Kind: static class of models
models.JAISModel
The bare JAIS Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.JAISLMHeadModel
The JAIS Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
models.CodeGenModel
CodeGenModel is a class representing a code generation model without a language model head.
Kind: static class of models
models.CodeGenForCausalLM
CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the CodeGenPreTrainedModel
class.
Kind: static class of models
models.LlamaPreTrainedModel
The bare LLama Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.LlamaModel
The bare LLaMA Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.CoherePreTrainedModel
The bare Cohere Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.GemmaPreTrainedModel
The bare Gemma Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.GemmaModel
The bare Gemma Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Gemma2PreTrainedModel
The bare Gemma2 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Gemma2Model
The bare Gemma2 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Qwen2PreTrainedModel
The bare Qwen2 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Qwen2Model
The bare Qwen2 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.PhiModel
The bare Phi Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Phi3Model
The bare Phi3 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.BloomPreTrainedModel
The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
models.BloomModel
The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.BloomForCausalLM
The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
models.MptModel
The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MptForCausalLM
The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
models.OPTModel
The bare OPT Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.OPTForCausalLM
The OPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
Kind: static class of models
models.VitPoseForPoseEstimation
The VitPose model with a pose estimation head on top.
Kind: static class of models
models.VitMatteForImageMatting
ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.
Example: Perform image matting with a VitMatteForImageMatting
model.
import { AutoProcessor, VitMatteForImageMatting, RawImage } from '@huggingface/transformers';
// Load processor and model
const processor = await AutoProcessor.from_pretrained('Xenova/vitmatte-small-distinctions-646');
const model = await VitMatteForImageMatting.from_pretrained('Xenova/vitmatte-small-distinctions-646');
// Load image and trimap
const image = await RawImage.fromURL('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_image.png');
const trimap = await RawImage.fromURL('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_trimap.png');
// Prepare image + trimap for the model
const inputs = await processor(image, trimap);
// Predict alpha matte
const { alphas } = await model(inputs);
// Tensor {
// dims: [ 1, 1, 640, 960 ],
// type: 'float32',
// size: 614400,
// data: Float32Array(614400) [ 0.9894027709960938, 0.9970508813858032, ... ]
// }
You can visualize the alpha matte as follows:
import { Tensor, cat } from '@huggingface/transformers';
// Visualize predicted alpha matte
const imageTensor = image.toTensor();
// Convert float (0-1) alpha matte to uint8 (0-255)
const alphaChannel = alphas
.squeeze(0)
.mul_(255)
.clamp_(0, 255)
.round_()
.to('uint8');
// Concatenate original image with predicted alpha
const imageData = cat([imageTensor, alphaChannel], 0);
// Save output image
const outputImage = RawImage.fromTensor(imageData);
outputImage.save('output.png');
Kind: static class of models
vitMatteForImageMatting._call(model_inputs)
Kind: instance method of VitMatteForImageMatting
Param | Type |
---|---|
model_inputs | any |
models.DetrObjectDetectionOutput
Kind: static class of models
new DetrObjectDetectionOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification logits (including no-object) for all queries. |
output.pred_boxes | Tensor | Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). |
models.DetrSegmentationOutput
Kind: static class of models
new DetrSegmentationOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | The output logits of the model. |
output.pred_boxes | Tensor | Predicted boxes. |
output.pred_masks | Tensor | Predicted masks. |
models.RTDetrObjectDetectionOutput
Kind: static class of models
new RTDetrObjectDetectionOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification logits (including no-object) for all queries. |
output.pred_boxes | Tensor | Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). |
models.TableTransformerModel
The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.TableTransformerForObjectDetection
Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection.
Kind: static class of models
tableTransformerForObjectDetection._call(model_inputs)
Kind: instance method of TableTransformerForObjectDetection
Param | Type |
---|---|
model_inputs | any |
models.ResNetPreTrainedModel
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
models.ResNetModel
The bare ResNet model outputting raw features without any specific head on top.
Kind: static class of models
models.ResNetForImageClassification
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
resNetForImageClassification._call(model_inputs)
Kind: instance method of ResNetForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.Swin2SRModel
The bare Swin2SR Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Swin2SRForImageSuperResolution
Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration.
Example: Super-resolution w/ Xenova/swin2SR-classical-sr-x2-64
.
import { AutoProcessor, Swin2SRForImageSuperResolution, RawImage } from '@huggingface/transformers';
// Load processor and model
const model_id = 'Xenova/swin2SR-classical-sr-x2-64';
const processor = await AutoProcessor.from_pretrained(model_id);
const model = await Swin2SRForImageSuperResolution.from_pretrained(model_id);
// Prepare model inputs
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/butterfly.jpg';
const image = await RawImage.fromURL(url);
const inputs = await processor(image);
// Run model
const outputs = await model(inputs);
// Convert Tensor to RawImage
const output = outputs.reconstruction.squeeze().clamp_(0, 1).mul_(255).round_().to('uint8');
const outputImage = RawImage.fromTensor(output);
// RawImage {
// data: Uint8Array(786432) [ 41, 31, 24, ... ],
// width: 512,
// height: 512,
// channels: 3
// }
Kind: static class of models
models.DPTModel
The bare DPT Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.DPTForDepthEstimation
DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
Example: Depth estimation w/ Xenova/dpt-hybrid-midas
.
import { DPTForDepthEstimation, AutoProcessor, RawImage, interpolate, max } from '@huggingface/transformers';
// Load model and processor
const model_id = 'Xenova/dpt-hybrid-midas';
const model = await DPTForDepthEstimation.from_pretrained(model_id);
const processor = await AutoProcessor.from_pretrained(model_id);
// Load image from URL
const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
const image = await RawImage.fromURL(url);
// Prepare image for the model
const inputs = await processor(image);
// Run model
const { predicted_depth } = await model(inputs);
// Interpolate to original size
const prediction = interpolate(predicted_depth, image.size.reverse(), 'bilinear', false);
// Visualize the prediction
const formatted = prediction.mul_(255 / max(prediction.data)[0]).to('uint8');
const depth = RawImage.fromTensor(formatted);
// RawImage {
// data: Uint8Array(307200) [ 85, 85, 84, ... ],
// width: 640,
// height: 480,
// channels: 1
// }
Kind: static class of models
models.DepthAnythingForDepthEstimation
Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.
Kind: static class of models
models.GLPNModel
The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.GLPNForDepthEstimation
GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.
Example: Depth estimation w/ Xenova/glpn-kitti
.
import { GLPNForDepthEstimation, AutoProcessor, RawImage, interpolate, max } from '@huggingface/transformers';
// Load model and processor
const model_id = 'Xenova/glpn-kitti';
const model = await GLPNForDepthEstimation.from_pretrained(model_id);
const processor = await AutoProcessor.from_pretrained(model_id);
// Load image from URL
const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';
const image = await RawImage.fromURL(url);
// Prepare image for the model
const inputs = await processor(image);
// Run model
const { predicted_depth } = await model(inputs);
// Interpolate to original size
const prediction = interpolate(predicted_depth, image.size.reverse(), 'bilinear', false);
// Visualize the prediction
const formatted = prediction.mul_(255 / max(prediction.data)[0]).to('uint8');
const depth = RawImage.fromTensor(formatted);
// RawImage {
// data: Uint8Array(307200) [ 207, 169, 154, ... ],
// width: 640,
// height: 480,
// channels: 1
// }
Kind: static class of models
models.DonutSwinModel
The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.
Example: Step-by-step Document Parsing.
import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers';
// Choose model to use
const model_id = 'Xenova/donut-base-finetuned-cord-v2';
// Prepare image inputs
const processor = await AutoProcessor.from_pretrained(model_id);
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/receipt.png';
const image = await RawImage.read(url);
const image_inputs = await processor(image);
// Prepare decoder inputs
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
const task_prompt = '<s_cord-v2>';
const decoder_input_ids = tokenizer(task_prompt, {
add_special_tokens: false,
}).input_ids;
// Create the model
const model = await AutoModelForVision2Seq.from_pretrained(model_id);
// Run inference
const output = await model.generate(image_inputs.pixel_values, {
decoder_input_ids,
max_length: model.config.decoder.max_position_embeddings,
});
// Decode output
const decoded = tokenizer.batch_decode(output)[0];
// <s_cord-v2><s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total></s>
Example: Step-by-step Document Visual Question Answering (DocVQA)
import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers';
// Choose model to use
const model_id = 'Xenova/donut-base-finetuned-docvqa';
// Prepare image inputs
const processor = await AutoProcessor.from_pretrained(model_id);
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/invoice.png';
const image = await RawImage.read(url);
const image_inputs = await processor(image);
// Prepare decoder inputs
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
const question = 'What is the invoice number?';
const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`;
const decoder_input_ids = tokenizer(task_prompt, {
add_special_tokens: false,
}).input_ids;
// Create the model
const model = await AutoModelForVision2Seq.from_pretrained(model_id);
// Run inference
const output = await model.generate(image_inputs.pixel_values, {
decoder_input_ids,
max_length: model.config.decoder.max_position_embeddings,
});
// Decode output
const decoded = tokenizer.batch_decode(output)[0];
// <s_docvqa><s_question> What is the invoice number?</s_question><s_answer> us-001</s_answer></s>
Kind: static class of models
models.ConvNextModel
The bare ConvNext model outputting raw features without any specific head on top.
Kind: static class of models
models.ConvNextForImageClassification
ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
convNextForImageClassification._call(model_inputs)
Kind: instance method of ConvNextForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.ConvNextV2Model
The bare ConvNextV2 model outputting raw features without any specific head on top.
Kind: static class of models
models.ConvNextV2ForImageClassification
ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
convNextV2ForImageClassification._call(model_inputs)
Kind: instance method of ConvNextV2ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.Dinov2Model
The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Dinov2ForImageClassification
Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.
Kind: static class of models
dinov2ForImageClassification._call(model_inputs)
Kind: instance method of Dinov2ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.Dinov2WithRegistersModel
The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Dinov2WithRegistersForImageClassification
Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.
Kind: static class of models
dinov2WithRegistersForImageClassification._call(model_inputs)
Kind: instance method of Dinov2WithRegistersForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.YolosObjectDetectionOutput
Kind: static class of models
new YolosObjectDetectionOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification logits (including no-object) for all queries. |
output.pred_boxes | Tensor | Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). |
models.SamModel
Segment Anything Model (SAM) for generating segmentation masks, given an input image and optional 2D location and bounding boxes.
Example: Perform mask generation w/ Xenova/sam-vit-base
.
import { SamModel, AutoProcessor, RawImage } from '@huggingface/transformers';
const model = await SamModel.from_pretrained('Xenova/sam-vit-base');
const processor = await AutoProcessor.from_pretrained('Xenova/sam-vit-base');
const img_url = 'https://huggingface.co./ybelkada/segment-anything/resolve/main/assets/car.png';
const raw_image = await RawImage.read(img_url);
const input_points = [[[450, 600]]] // 2D localization of a window
const inputs = await processor(raw_image, { input_points });
const outputs = await model(inputs);
const masks = await processor.post_process_masks(outputs.pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes);
// [
// Tensor {
// dims: [ 1, 3, 1764, 2646 ],
// type: 'bool',
// data: Uint8Array(14002632) [ ... ],
// size: 14002632
// }
// ]
const scores = outputs.iou_scores;
// Tensor {
// dims: [ 1, 1, 3 ],
// type: 'float32',
// data: Float32Array(3) [
// 0.8892380595207214,
// 0.9311248064041138,
// 0.983696699142456
// ],
// size: 3
// }
Kind: static class of models
- .SamModel
.get_image_embeddings(model_inputs)
βPromise.<{image_embeddings: Tensor, image_positional_embeddings: Tensor}>
.forward(model_inputs)
βPromise.<Object>
._call(model_inputs)
βPromise.<SamImageSegmentationOutput>
samModel.get_image_embeddings(model_inputs) β <code> Promise. < {image_embeddings: Tensor, image_positional_embeddings: Tensor} > </code>
Compute image embeddings and positional image embeddings, given the pixel values of an image.
Kind: instance method of SamModel
Returns: Promise.<{image_embeddings: Tensor, image_positional_embeddings: Tensor}>
- The image embeddings and positional image embeddings.
Param | Type | Description |
---|---|---|
model_inputs | Object | Object containing the model inputs. |
model_inputs.pixel_values | Tensor | Pixel values obtained using a |
samModel.forward(model_inputs) β <code> Promise. < Object > </code>
Kind: instance method of SamModel
Returns: Promise.<Object>
- The output of the model.
Param | Type | Description |
---|---|---|
model_inputs | SamModelInputs | Object containing the model inputs. |
samModel._call(model_inputs) β <code> Promise. < SamImageSegmentationOutput > </code>
Runs the model with the provided inputs
Kind: instance method of SamModel
Returns: Promise.<SamImageSegmentationOutput>
- Object containing segmentation outputs
Param | Type | Description |
---|---|---|
model_inputs | Object | Model inputs |
models.SamImageSegmentationOutput
Base class for Segment-Anything modelβs output.
Kind: static class of models
new SamImageSegmentationOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.iou_scores | Tensor | The output logits of the model. |
output.pred_masks | Tensor | Predicted boxes. |
models.Wav2Vec2Model
The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.
Example: Load and run a Wav2Vec2Model
for feature extraction.
import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/mms-300m');
const audio = await read_audio('https://huggingface.co./datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModel.from_pretrained('Xenova/mms-300m');
const output = await model(inputs);
// {
// last_hidden_state: Tensor {
// dims: [ 1, 1144, 1024 ],
// type: 'float32',
// data: Float32Array(1171456) [ ... ],
// size: 1171456
// }
// }
Kind: static class of models
models.Wav2Vec2ForAudioFrameClassification
Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization.
Kind: static class of models
wav2Vec2ForAudioFrameClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of Wav2Vec2ForAudioFrameClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.PyAnnoteModel
The bare PyAnnote Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.PyAnnoteForAudioFrameClassification
PyAnnote Model with a frame classification head on top for tasks like Speaker Diarization.
Example: Load and run a PyAnnoteForAudioFrameClassification
for speaker diarization.
import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers';
// Load model and processor
const model_id = 'onnx-community/pyannote-segmentation-3.0';
const model = await AutoModelForAudioFrameClassification.from_pretrained(model_id);
const processor = await AutoProcessor.from_pretrained(model_id);
// Read and preprocess audio
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/mlk.wav';
const audio = await read_audio(url, processor.feature_extractor.config.sampling_rate);
const inputs = await processor(audio);
// Run model with inputs
const { logits } = await model(inputs);
// {
// logits: Tensor {
// dims: [ 1, 767, 7 ], // [batch_size, num_frames, num_classes]
// type: 'float32',
// data: Float32Array(5369) [ ... ],
// size: 5369
// }
// }
const result = processor.post_process_speaker_diarization(logits, audio.length);
// [
// [
// { id: 0, start: 0, end: 1.0512535626298245, confidence: 0.8220156481664611 },
// { id: 2, start: 1.0512535626298245, end: 2.3398869619825127, confidence: 0.9008811707860472 },
// ...
// ]
// ]
// Display result
console.table(result[0], ['start', 'end', 'id', 'confidence']);
// βββββββββββ¬βββββββββββββββββββββ¬βββββββββββββββββββββ¬βββββ¬ββββββββββββββββββββββ
// β (index) β start β end β id β confidence β
// βββββββββββΌβββββββββββββββββββββΌβββββββββββββββββββββΌβββββΌββββββββββββββββββββββ€
// β 0 β 0 β 1.0512535626298245 β 0 β 0.8220156481664611 β
// β 1 β 1.0512535626298245 β 2.3398869619825127 β 2 β 0.9008811707860472 β
// β 2 β 2.3398869619825127 β 3.5946089560890773 β 0 β 0.7521651315796233 β
// β 3 β 3.5946089560890773 β 4.578039708226655 β 2 β 0.8491978128022479 β
// β 4 β 4.578039708226655 β 4.594995410849717 β 0 β 0.2935352600416393 β
// β 5 β 4.594995410849717 β 6.121008646925269 β 3 β 0.6788051309866024 β
// β 6 β 6.121008646925269 β 6.256654267909762 β 0 β 0.37125512393851134 β
// β 7 β 6.256654267909762 β 8.630452635138397 β 2 β 0.7467035186353542 β
// β 8 β 8.630452635138397 β 10.088643060721703 β 0 β 0.7689364814666032 β
// β 9 β 10.088643060721703 β 12.58113134631177 β 2 β 0.9123324509131324 β
// β 10 β 12.58113134631177 β 13.005023911888312 β 0 β 0.4828358177572041 β
// βββββββββββ΄βββββββββββββββββββββ΄βββββββββββββββββββββ΄βββββ΄ββββββββββββββββββββββ
Kind: static class of models
pyAnnoteForAudioFrameClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of PyAnnoteForAudioFrameClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.UniSpeechModel
The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.UniSpeechForCTC
UniSpeech Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
uniSpeechForCTC._call(model_inputs)
Kind: instance method of UniSpeechForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_values | Tensor | Float values of input raw speech waveform. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
models.UniSpeechForSequenceClassification
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output).
Kind: static class of models
uniSpeechForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of UniSpeechForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.UniSpeechSatModel
The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.UniSpeechSatForCTC
UniSpeechSat Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
uniSpeechSatForCTC._call(model_inputs)
Kind: instance method of UniSpeechSatForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_values | Tensor | Float values of input raw speech waveform. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
models.UniSpeechSatForSequenceClassification
UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output).
Kind: static class of models
uniSpeechSatForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of UniSpeechSatForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.UniSpeechSatForAudioFrameClassification
UniSpeechSat Model with a frame classification head on top for tasks like Speaker Diarization.
Kind: static class of models
uniSpeechSatForAudioFrameClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of UniSpeechSatForAudioFrameClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.Wav2Vec2BertModel
The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Wav2Vec2BertForCTC
Wav2Vec2Bert Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
wav2Vec2BertForCTC._call(model_inputs)
Kind: instance method of Wav2Vec2BertForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_features | Tensor | Float values of input mel-spectrogram. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
models.Wav2Vec2BertForSequenceClassification
Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output).
Kind: static class of models
wav2Vec2BertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of Wav2Vec2BertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.HubertModel
The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.
Example: Load and run a HubertModel
for feature extraction.
import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/hubert-base-ls960');
const audio = await read_audio('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000);
const inputs = await processor(audio);
// Load and run model with inputs
const model = await AutoModel.from_pretrained('Xenova/hubert-base-ls960');
const output = await model(inputs);
// {
// last_hidden_state: Tensor {
// dims: [ 1, 549, 768 ],
// type: 'float32',
// data: Float32Array(421632) [0.0682469978928566, 0.08104046434164047, -0.4975186586380005, ...],
// size: 421632
// }
// }
Kind: static class of models
models.HubertForCTC
Hubert Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
hubertForCTC._call(model_inputs)
Kind: instance method of HubertForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_values | Tensor | Float values of input raw speech waveform. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
models.HubertForSequenceClassification
Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.
Kind: static class of models
hubertForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of HubertForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.WavLMPreTrainedModel
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
models.WavLMModel
The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.
Example: Load and run a WavLMModel
for feature extraction.
import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base');
const audio = await read_audio('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModel.from_pretrained('Xenova/wavlm-base');
const output = await model(inputs);
// {
// last_hidden_state: Tensor {
// dims: [ 1, 549, 768 ],
// type: 'float32',
// data: Float32Array(421632) [-0.349443256855011, -0.39341306686401367, 0.022836603224277496, ...],
// size: 421632
// }
// }
Kind: static class of models
models.WavLMForCTC
WavLM Model with a language modeling
head on top for Connectionist Temporal Classification (CTC).
Kind: static class of models
wavLMForCTC._call(model_inputs)
Kind: instance method of WavLMForCTC
Param | Type | Description |
---|---|---|
model_inputs | Object | |
model_inputs.input_values | Tensor | Float values of input raw speech waveform. |
model_inputs.attention_mask | Tensor | Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] |
models.WavLMForSequenceClassification
WavLM Model with a sequence classification head on top (a linear layer over the pooled output).
Kind: static class of models
wavLMForSequenceClassification._call(model_inputs) β <code> Promise. < SequenceClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of WavLMForSequenceClassification
Returns: Promise.<SequenceClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.WavLMForXVector
WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.
Example: Extract speaker embeddings with WavLMForXVector
.
import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sv');
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
const audio = await read_audio(url, 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModel.from_pretrained('Xenova/wavlm-base-plus-sv');
const outputs = await model(inputs);
// {
// logits: Tensor {
// dims: [ 1, 512 ],
// type: 'float32',
// data: Float32Array(512) [0.5847219228744507, ...],
// size: 512
// },
// embeddings: Tensor {
// dims: [ 1, 512 ],
// type: 'float32',
// data: Float32Array(512) [-0.09079201519489288, ...],
// size: 512
// }
// }
Kind: static class of models
wavLMForXVector._call(model_inputs) β <code> Promise. < XVectorOutput > </code>
Calls the model on new inputs.
Kind: instance method of WavLMForXVector
Returns: Promise.<XVectorOutput>
- An object containing the modelβs output logits and speaker embeddings.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.WavLMForAudioFrameClassification
WavLM Model with a frame classification head on top for tasks like Speaker Diarization.
Example: Perform speaker diarization with WavLMForAudioFrameClassification
.
import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers';
// Read and preprocess audio
const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sd');
const url = 'https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';
const audio = await read_audio(url, 16000);
const inputs = await processor(audio);
// Run model with inputs
const model = await AutoModelForAudioFrameClassification.from_pretrained('Xenova/wavlm-base-plus-sd');
const { logits } = await model(inputs);
// {
// logits: Tensor {
// dims: [ 1, 549, 2 ], // [batch_size, num_frames, num_speakers]
// type: 'float32',
// data: Float32Array(1098) [-3.5301010608673096, ...],
// size: 1098
// }
// }
const labels = logits[0].sigmoid().tolist().map(
frames => frames.map(speaker => speaker > 0.5 ? 1 : 0)
);
console.log(labels); // labels is a one-hot array of shape (num_frames, num_speakers)
// [
// [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
// [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
// [0, 0], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1],
// ...
// ]
Kind: static class of models
wavLMForAudioFrameClassification._call(model_inputs) β <code> Promise. < TokenClassifierOutput > </code>
Calls the model on new inputs.
Kind: instance method of WavLMForAudioFrameClassification
Returns: Promise.<TokenClassifierOutput>
- An object containing the modelβs output logits for sequence classification.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.SpeechT5PreTrainedModel
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
Kind: static class of models
models.SpeechT5Model
The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets.
Kind: static class of models
models.SpeechT5ForSpeechToText
SpeechT5 Model with a speech encoder and a text decoder.
Example: Generate speech from text with SpeechT5ForSpeechToText
.
import { AutoTokenizer, AutoProcessor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, Tensor } from '@huggingface/transformers';
// Load the tokenizer and processor
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts');
const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts');
// Load the models
// NOTE: We use the full-precision versions as they are more accurate
const model = await SpeechT5ForTextToSpeech.from_pretrained('Xenova/speecht5_tts', { dtype: 'fp32' });
const vocoder = await SpeechT5HifiGan.from_pretrained('Xenova/speecht5_hifigan', { dtype: 'fp32' });
// Load speaker embeddings from URL
const speaker_embeddings_data = new Float32Array(
await (await fetch('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin')).arrayBuffer()
);
const speaker_embeddings = new Tensor(
'float32',
speaker_embeddings_data,
[1, speaker_embeddings_data.length]
)
// Run tokenization
const { input_ids } = tokenizer('Hello, my dog is cute');
// Generate waveform
const { waveform } = await model.generate_speech(input_ids, speaker_embeddings, { vocoder });
console.log(waveform)
// Tensor {
// dims: [ 26112 ],
// type: 'float32',
// size: 26112,
// data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ],
// }
Kind: static class of models
models.SpeechT5ForTextToSpeech
SpeechT5 Model with a text encoder and a speech decoder.
Kind: static class of models
speechT5ForTextToSpeech.generate_speech(input_values, speaker_embeddings, options) β <code> Promise. < SpeechOutput > </code>
Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder.
Kind: instance method of SpeechT5ForTextToSpeech
Returns: Promise.<SpeechOutput>
- A promise which resolves to an object containing the spectrogram, waveform, and cross-attention tensors.
Param | Type | Default | Description |
---|---|---|---|
input_values | Tensor | Indices of input sequence tokens in the vocabulary. | |
speaker_embeddings | Tensor | Tensor containing the speaker embeddings. | |
options | Object | Optional parameters for generating speech. | |
[options.threshold] | number | 0.5 | The generated sequence ends when the predicted stop token probability exceeds this value. |
[options.minlenratio] | number | 0.0 | Used to calculate the minimum required length for the output sequence. |
[options.maxlenratio] | number | 20.0 | Used to calculate the maximum allowed length for the output sequence. |
[options.vocoder] | Object |
| The vocoder that converts the mel spectrogram into a speech waveform. If |
[options.output_cross_attentions] | boolean | false | Whether or not to return the attentions tensors of the decoder's cross-attention layers. |
models.SpeechT5HifiGan
HiFi-GAN vocoder.
See SpeechT5ForSpeechToText for example usage.
Kind: static class of models
models.TrOCRForCausalLM
The TrOCR Decoder with a language modeling head.
Kind: static class of models
models.MistralPreTrainedModel
The bare Mistral Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.Starcoder2PreTrainedModel
The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.FalconPreTrainedModel
The bare Falcon Model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.ClapTextModelWithProjection
CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).
Example: Compute text embeddings with ClapTextModelWithProjection
.
import { AutoTokenizer, ClapTextModelWithProjection } from '@huggingface/transformers';
// Load tokenizer and text model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clap-htsat-unfused');
const text_model = await ClapTextModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused');
// Run tokenization
const texts = ['a sound of a cat', 'a sound of a dog'];
const text_inputs = tokenizer(texts, { padding: true, truncation: true });
// Compute embeddings
const { text_embeds } = await text_model(text_inputs);
// Tensor {
// dims: [ 2, 512 ],
// type: 'float32',
// data: Float32Array(1024) [ ... ],
// size: 1024
// }
Kind: static class of models
ClapTextModelWithProjection.from_pretrained() : <code> * </code>
Kind: static method of ClapTextModelWithProjection
models.ClapAudioModelWithProjection
CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).
Example: Compute audio embeddings with ClapAudioModelWithProjection
.
import { AutoProcessor, ClapAudioModelWithProjection, read_audio } from '@huggingface/transformers';
// Load processor and audio model
const processor = await AutoProcessor.from_pretrained('Xenova/clap-htsat-unfused');
const audio_model = await ClapAudioModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused');
// Read audio and run processor
const audio = await read_audio('https://huggingface.co./datasets/Xenova/transformers.js-docs/resolve/main/cat_meow.wav');
const audio_inputs = await processor(audio);
// Compute embeddings
const { audio_embeds } = await audio_model(audio_inputs);
// Tensor {
// dims: [ 1, 512 ],
// type: 'float32',
// data: Float32Array(512) [ ... ],
// size: 512
// }
Kind: static class of models
ClapAudioModelWithProjection.from_pretrained() : <code> * </code>
Kind: static method of ClapAudioModelWithProjection
models.VitsModel
The complete VITS model, for text-to-speech synthesis.
Example: Generate speech from text with VitsModel
.
import { AutoTokenizer, VitsModel } from '@huggingface/transformers';
// Load the tokenizer and model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/mms-tts-eng');
const model = await VitsModel.from_pretrained('Xenova/mms-tts-eng');
// Run tokenization
const inputs = tokenizer('I love transformers');
// Generate waveform
const { waveform } = await model(inputs);
// Tensor {
// dims: [ 1, 35328 ],
// type: 'float32',
// data: Float32Array(35328) [ ... ],
// size: 35328,
// }
Kind: static class of models
vitsModel._call(model_inputs) β <code> Promise. < VitsModelOutput > </code>
Calls the model on new inputs.
Kind: instance method of VitsModel
Returns: Promise.<VitsModelOutput>
- The outputs for the VITS model.
Param | Type | Description |
---|---|---|
model_inputs | Object | The inputs to the model. |
models.SegformerModel
The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.SegformerForImageClassification
SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.
Kind: static class of models
models.SegformerForSemanticSegmentation
SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.
Kind: static class of models
models.StableLmModel
The bare StableLm Model transformer outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.StableLmForCausalLM
StableLm Model with a language modeling
head on top for Causal Language Modeling (with past).
Kind: static class of models
models.EfficientNetModel
The bare EfficientNet model outputting raw features without any specific head on top.
Kind: static class of models
models.EfficientNetForImageClassification
EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features).
Kind: static class of models
efficientNetForImageClassification._call(model_inputs)
Kind: instance method of EfficientNetForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.MusicgenModel
The bare Musicgen decoder model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MusicgenForCausalLM
The MusicGen decoder model with a language modelling head on top.
Kind: static class of models
models.MusicgenForConditionalGeneration
The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, for music generation tasks with one or both of text and audio prompts.
Example: Generate music from text with Xenova/musicgen-small
.
import { AutoTokenizer, MusicgenForConditionalGeneration } from '@huggingface/transformers';
// Load tokenizer and model
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/musicgen-small');
const model = await MusicgenForConditionalGeneration.from_pretrained(
'Xenova/musicgen-small', { dtype: 'fp32' }
);
// Prepare text input
const prompt = '80s pop track with bassy drums and synth';
const inputs = tokenizer(prompt);
// Generate audio
const audio_values = await model.generate({
...inputs,
max_new_tokens: 512,
do_sample: true,
guidance_scale: 3,
});
// (Optional) Write the output to a WAV file
import wavefile from 'wavefile';
import fs from 'fs';
const wav = new wavefile.WaveFile();
wav.fromScratch(1, model.config.audio_encoder.sampling_rate, '32f', audio_values.data);
fs.writeFileSync('musicgen_out.wav', wav.toBuffer());
Kind: static class of models
- .MusicgenForConditionalGeneration
._apply_and_filter_by_delay_pattern_mask(outputs)
βTensor
.generate(options)
βPromise.<(ModelOutput|Tensor)>
musicgenForConditionalGeneration._apply_and_filter_by_delay_pattern_mask(outputs) β <code> Tensor </code>
Apply the pattern mask to the final ids, then revert the pattern delay mask by filtering the pad token id in a single step.
Kind: instance method of MusicgenForConditionalGeneration
Returns: Tensor
- The filtered output tensor.
Param | Type | Description |
---|---|---|
outputs | Tensor | The output tensor from the model. |
musicgenForConditionalGeneration.generate(options) β <code> Promise. < (ModelOutput|Tensor) > </code>
Generates sequences of token ids for models with a language modeling head.
Kind: instance method of MusicgenForConditionalGeneration
Returns: Promise.<(ModelOutput|Tensor)>
- The output of the model, which can contain the generated token ids, attentions, and scores.
Param | Type |
---|---|
options | * |
models.MobileNetV1Model
The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MobileNetV1ForImageClassification
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
mobileNetV1ForImageClassification._call(model_inputs)
Kind: instance method of MobileNetV1ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.MobileNetV2Model
The bare MobileNetV2 model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MobileNetV2ForImageClassification
MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
mobileNetV2ForImageClassification._call(model_inputs)
Kind: instance method of MobileNetV2ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.MobileNetV3Model
The bare MobileNetV3 model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MobileNetV3ForImageClassification
MobileNetV3 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
mobileNetV3ForImageClassification._call(model_inputs)
Kind: instance method of MobileNetV3ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.MobileNetV4Model
The bare MobileNetV4 model outputting raw hidden-states without any specific head on top.
Kind: static class of models
models.MobileNetV4ForImageClassification
MobileNetV4 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.
Kind: static class of models
mobileNetV4ForImageClassification._call(model_inputs)
Kind: instance method of MobileNetV4ForImageClassification
Param | Type |
---|---|
model_inputs | any |
models.DecisionTransformerModel
The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
Kind: static class of models
models.MgpstrForSceneTextRecognition
MGP-STR Model transformer with three classification heads on top (three A^3 modules and three linear layer on top of the transformer encoder output) for scene text recognition (STR).
Kind: static class of models
mgpstrForSceneTextRecognition._call(model_inputs)
Kind: instance method of MgpstrForSceneTextRecognition
Param | Type |
---|---|
model_inputs | any |
models.PatchTSTModel
The bare PatchTST Model outputting raw hidden-states without any specific head.
Kind: static class of models
models.PatchTSTForPrediction
The PatchTST for prediction model.
Kind: static class of models
models.PatchTSMixerModel
The bare PatchTSMixer Model outputting raw hidden-states without any specific head.
Kind: static class of models
models.PatchTSMixerForPrediction
The PatchTSMixer for prediction model.
Kind: static class of models
models.PretrainedMixin
Base class of all AutoModels. Contains the from_pretrained
function
which is used to instantiate pretrained models.
Kind: static class of models
- .PretrainedMixin
- instance
- static
pretrainedMixin.MODEL_CLASS_MAPPINGS : <code> * </code>
Mapping from model type to model class.
Kind: instance property of PretrainedMixin
pretrainedMixin.BASE_IF_FAIL
Whether to attempt to instantiate the base class (PretrainedModel
) if
the model type is not found in the mapping.
Kind: instance property of PretrainedMixin
PretrainedMixin.from_pretrained() : <code> * </code>
Kind: static method of PretrainedMixin
models.AutoModel
Helper class which is used to instantiate pretrained models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModel()
Example
let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased');
autoModel.MODEL_CLASS_MAPPINGS : <code> * </code>
Kind: instance property of AutoModel
models.AutoModelForSequenceClassification
Helper class which is used to instantiate pretrained sequence classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForSequenceClassification()
Example
let model = await AutoModelForSequenceClassification.from_pretrained('Xenova/distilbert-base-uncased-finetuned-sst-2-english');
models.AutoModelForTokenClassification
Helper class which is used to instantiate pretrained token classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForTokenClassification()
Example
let model = await AutoModelForTokenClassification.from_pretrained('Xenova/distilbert-base-multilingual-cased-ner-hrl');
models.AutoModelForSeq2SeqLM
Helper class which is used to instantiate pretrained sequence-to-sequence models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForSeq2SeqLM()
Example
let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small');
models.AutoModelForSpeechSeq2Seq
Helper class which is used to instantiate pretrained sequence-to-sequence speech-to-text models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForSpeechSeq2Seq()
Example
let model = await AutoModelForSpeechSeq2Seq.from_pretrained('openai/whisper-tiny.en');
models.AutoModelForTextToSpectrogram
Helper class which is used to instantiate pretrained sequence-to-sequence text-to-spectrogram models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForTextToSpectrogram()
Example
let model = await AutoModelForTextToSpectrogram.from_pretrained('microsoft/speecht5_tts');
models.AutoModelForTextToWaveform
Helper class which is used to instantiate pretrained text-to-waveform models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForTextToWaveform()
Example
let model = await AutoModelForTextToSpectrogram.from_pretrained('facebook/mms-tts-eng');
models.AutoModelForCausalLM
Helper class which is used to instantiate pretrained causal language models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForCausalLM()
Example
let model = await AutoModelForCausalLM.from_pretrained('Xenova/gpt2');
models.AutoModelForMaskedLM
Helper class which is used to instantiate pretrained masked language models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForMaskedLM()
Example
let model = await AutoModelForMaskedLM.from_pretrained('Xenova/bert-base-uncased');
models.AutoModelForQuestionAnswering
Helper class which is used to instantiate pretrained question answering models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForQuestionAnswering()
Example
let model = await AutoModelForQuestionAnswering.from_pretrained('Xenova/distilbert-base-cased-distilled-squad');
models.AutoModelForVision2Seq
Helper class which is used to instantiate pretrained vision-to-sequence models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForVision2Seq()
Example
let model = await AutoModelForVision2Seq.from_pretrained('Xenova/vit-gpt2-image-captioning');
models.AutoModelForImageClassification
Helper class which is used to instantiate pretrained image classification models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForImageClassification()
Example
let model = await AutoModelForImageClassification.from_pretrained('Xenova/vit-base-patch16-224');
models.AutoModelForImageSegmentation
Helper class which is used to instantiate pretrained image segmentation models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForImageSegmentation()
Example
let model = await AutoModelForImageSegmentation.from_pretrained('Xenova/detr-resnet-50-panoptic');
models.AutoModelForSemanticSegmentation
Helper class which is used to instantiate pretrained image segmentation models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForSemanticSegmentation()
Example
let model = await AutoModelForSemanticSegmentation.from_pretrained('nvidia/segformer-b3-finetuned-cityscapes-1024-1024');
models.AutoModelForUniversalSegmentation
Helper class which is used to instantiate pretrained universal image segmentation models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForUniversalSegmentation()
Example
let model = await AutoModelForUniversalSegmentation.from_pretrained('hf-internal-testing/tiny-random-MaskFormerForInstanceSegmentation');
models.AutoModelForObjectDetection
Helper class which is used to instantiate pretrained object detection models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForObjectDetection()
Example
let model = await AutoModelForObjectDetection.from_pretrained('Xenova/detr-resnet-50');
models.AutoModelForMaskGeneration
Helper class which is used to instantiate pretrained mask generation models with the from_pretrained
function.
The chosen model class is determined by the type specified in the model config.
Kind: static class of models
new AutoModelForMaskGeneration()
Example
let model = await AutoModelForMaskGeneration.from_pretrained('Xenova/sam-vit-base');
models.Seq2SeqLMOutput
Kind: static class of models
new Seq2SeqLMOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | The output logits of the model. |
output.past_key_values | Tensor | An tensor of key/value pairs that represent the previous state of the model. |
output.encoder_outputs | Tensor | The output of the encoder in a sequence-to-sequence model. |
[output.decoder_attentions] | Tensor | Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. |
[output.cross_attentions] | Tensor | Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. |
models.SequenceClassifierOutput
Base class for outputs of sentence classification models.
Kind: static class of models
new SequenceClassifierOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | classification (or regression if config.num_labels==1) scores (before SoftMax). |
models.XVectorOutput
Base class for outputs of XVector models.
Kind: static class of models
new XVectorOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification hidden states before AMSoftmax, of shape |
output.embeddings | Tensor | Utterance embeddings used for vector similarity-based retrieval, of shape |
models.TokenClassifierOutput
Base class for outputs of token classification models.
Kind: static class of models
new TokenClassifierOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Classification scores (before SoftMax). |
models.MaskedLMOutput
Base class for masked language models outputs.
Kind: static class of models
new MaskedLMOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
models.QuestionAnsweringModelOutput
Base class for outputs of question answering models.
Kind: static class of models
new QuestionAnsweringModelOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.start_logits | Tensor | Span-start scores (before SoftMax). |
output.end_logits | Tensor | Span-end scores (before SoftMax). |
models.CausalLMOutput
Base class for causal language model (or autoregressive) outputs.
Kind: static class of models
new CausalLMOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before softmax). |
models.CausalLMOutputWithPast
Base class for causal language model (or autoregressive) outputs.
Kind: static class of models
new CausalLMOutputWithPast(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.logits | Tensor | Prediction scores of the language modeling head (scores for each vocabulary token before softmax). |
output.past_key_values | Tensor | Contains pre-computed hidden-states (key and values in the self-attention blocks)
that can be used (see |
models.ImageMattingOutput
Kind: static class of models
new ImageMattingOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.alphas | Tensor | Estimated alpha values, of shape |
models.VitsModelOutput
Describes the outputs for the VITS model.
Kind: static class of models
new VitsModelOutput(output)
Param | Type | Description |
---|---|---|
output | Object | The output of the model. |
output.waveform | Tensor | The final audio waveform predicted by the model, of shape |
output.spectrogram | Tensor | The log-mel spectrogram predicted at the output of the flow model. This spectrogram is passed to the Hi-Fi GAN decoder model to obtain the final audio waveform. |
models~cumsum_masked_fill(attention_mask) β <code> Object </code>
Helper function to perform the following:
x = attention_mask.long().cumsum(-1) - 1
x.masked_fill_(attention_mask == 0, 1)
Kind: inner method of models
Param | Type |
---|---|
attention_mask | Tensor |
models~createPositionIds()
If the model supports providing position_ids, we create position_ids on the fly for batch generation, by computing the cumulative sum of the attention mask along the sequence length dimension.
Equivalent to:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -input_ids.shape[1] :]
Kind: inner method of models
models~SamModelInputs : <code> Object </code>
Object containing the model inputs.
Kind: inner typedef of models
Properties
Name | Type | Description |
---|---|---|
pixel_values | Tensor | Pixel values as a Tensor with shape |
[input_points] | Tensor | Input 2D spatial points with shape |
[input_labels] | Tensor | Input labels for the points, as a Tensor of shape
|
[input_boxes] | Tensor | Input bounding boxes with shape |
[image_embeddings] | Tensor | Image embeddings used by the mask decoder. |
[image_positional_embeddings] | Tensor | Image positional embeddings used by the mask decoder. |
models~SpeechOutput : <code> Object </code>
Kind: inner typedef of models
Properties
Name | Type | Description |
---|---|---|
[spectrogram] | Tensor | The predicted log-mel spectrogram of shape
|
[waveform] | Tensor | The predicted waveform of shape |
[cross_attentions] | Tensor | The outputs of the decoder's cross-attention layers of shape
|
< > Update on GitHub