sc2582 commited on
Commit
ac69c70
·
verified ·
1 Parent(s): 5a075e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -3,16 +3,16 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
4
 
5
  # Adjust this to your model ID
6
- model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
7
 
8
- peft_model_id = "decision-oaif/Meta-Llama-3-8B-Instruct-sft-intercode-bash-iter0"
9
  # Load model with device map and dtype
10
  model = AutoModelForCausalLM.from_pretrained(
11
  model_id,
12
  torch_dtype=torch.bfloat16,
13
  device_map="auto"
14
  )
15
- model.load_adapter(peft_model_id)
16
 
17
  # Load tokenizer and set truncation and padding
18
  tokenizer = AutoTokenizer.from_pretrained(model_id, truncation=True, padding=True)
 
3
  import gradio as gr
4
 
5
  # Adjust this to your model ID
6
+ model_id = "decision-oaif/Meta-Llama-3-8B-Instruct-sft-intercode-bash-iter1"
7
 
8
+ #peft_model_id = "decision-oaif/Meta-Llama-3-8B-Instruct-sft-intercode-bash-iter0"
9
  # Load model with device map and dtype
10
  model = AutoModelForCausalLM.from_pretrained(
11
  model_id,
12
  torch_dtype=torch.bfloat16,
13
  device_map="auto"
14
  )
15
+ #model.load_adapter(peft_model_id)
16
 
17
  # Load tokenizer and set truncation and padding
18
  tokenizer = AutoTokenizer.from_pretrained(model_id, truncation=True, padding=True)