Edit model card
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co./docs/hub/model-cards#model-card-metadata)

Experimental 20B instruction tuned model based on gpt-neox-20b.

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

name = "jordiclive/instruction-tuned-gpt-neox-20b"
model = AutoModelForCausalLM.from_pretrained(name, device_map=chip_map, torch_dtype=torch.float16)# load_in_8bit=True )
tokenizer = AutoTokenizer.from_pretrained(name)

def generate_from_model(model, tokenizer):
    encoded_input = tokenizer(text, return_tensors='pt')
    output_sequences = model.generate(
                                    input_ids=encoded_input['input_ids'].cuda(0),
                                    do_sample=True,
                                    max_new_tokens=35,
                                    num_return_sequences=1,
                                    top_p=0.95,
                                    temperature=0.5,
                                    penalty_alpha=0.6,
                                    top_k=4,
                                    output_scores=True,
                                    return_dict_in_generate=True,
                                    repetition_penalty=1.03,
                                    eos_token_id=0,
                                    use_cache=True
                                  )
    gen_sequences = output_sequences.sequences[:, encoded_input['input_ids'].shape[-1]:]
    for sequence in gen_sequences:
        new_line=tokenizer.decode(sequence, skip_special_tokens=True)
        print(new_line)

text = "User: Will big tech A.I be adulterated with advertisement?\n\nOA:"
generate_from_model(model,tokenizer)
Downloads last month
9
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.