AthuKawale commited on
Commit
60870dc
·
1 Parent(s): 55b5608

removed peft

Browse files
Files changed (1) hide show
  1. app.py +22 -19
app.py CHANGED
@@ -1,4 +1,3 @@
1
- from peft import PeftModel, PeftConfig
2
  import torch
3
  from datasets import load_dataset
4
 
@@ -8,28 +7,32 @@ print("executed successfully")
8
  dataset_name = "timdettmers/openassistant-guanaco"
9
  dataset = load_dataset(dataset_name, split="train")
10
 
 
11
 
12
- import os
 
 
 
 
 
13
 
14
- ld_library_path = os.environ.get("LD_LIBRARY_PATH")
15
- print(ld_library_path)
16
 
17
- # The directory to search in
18
- search_directory = '/usr/'
 
 
 
 
 
19
 
20
- # The filename pattern to look for (wildcard * is used)
21
- filename_pattern = 'libcuda.so.*'
22
 
23
- # Initialize a list to store the paths of matching files
24
- matching_files = []
 
 
 
25
 
26
- # Walk through the directory and its subdirectories
27
- for root, dirs, files in os.walk(search_directory):
28
- for file in files:
29
- if file.startswith('libcuda.so.'):
30
- matching_files.append(os.path.join(root, file))
31
-
32
- # Print the paths of matching files
33
- for file in matching_files:
34
- print(file)
35
 
 
 
1
  import torch
2
  from datasets import load_dataset
3
 
 
7
  dataset_name = "timdettmers/openassistant-guanaco"
8
  dataset = load_dataset(dataset_name, split="train")
9
 
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
11
 
12
+ # quantizition configuration
13
+ bnb_config = BitsAndBytesConfig(
14
+ load_in_4bit=True,
15
+ bnb_4bit_quant_type="nf4",
16
+ bnb_4bit_compute_dtype=torch.float16,
17
+ )
18
 
19
+ # download model
 
20
 
21
+ model_name = "TinyPixel/Llama-2-7B-bf16-sharded"
22
+ model = AutoModelForCausalLM.from_pretrained(
23
+ model_name,
24
+ quantization_config=bnb_config,
25
+ trust_remote_code=True
26
+ )
27
+ model.config.use_cache = False
28
 
29
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
30
+ tokenizer.pad_token = tokenizer.eos_token
31
 
32
+ text = "What is a large language model?"
33
+ device = "cuda:0"
34
+ inputs = tokenizer(text, return_tensors="pt").to(device)
35
+ outputs = model.generate(**inputs, max_new_tokens=50)
36
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
37
 
 
 
 
 
 
 
 
 
 
38